1   package org.apache.lucene.index;
2   
3   /*
4    * Licensed to the Apache Software Foundation (ASF) under one or more
5    * contributor license agreements.  See the NOTICE file distributed with
6    * this work for additional information regarding copyright ownership.
7    * The ASF licenses this file to You under the Apache License, Version 2.0
8    * (the "License"); you may not use this file except in compliance with
9    * the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  import java.io.IOException;
21  import java.util.ArrayList;
22  import java.util.Collection;
23  import java.util.Collections;
24  import java.util.Comparator;
25  import java.util.HashSet;
26  import java.util.List;
27  import java.util.Locale;
28  import java.util.Map;
29  
30  /**
31   *  Merges segments of approximately equal size, subject to
32   *  an allowed number of segments per tier.  This is similar
33   *  to {@link LogByteSizeMergePolicy}, except this merge
34   *  policy is able to merge non-adjacent segment, and
35   *  separates how many segments are merged at once ({@link
36   *  #setMaxMergeAtOnce}) from how many segments are allowed
37   *  per tier ({@link #setSegmentsPerTier}).  This merge
38   *  policy also does not over-merge (i.e. cascade merges). 
39   *
40   *  <p>For normal merging, this policy first computes a
41   *  "budget" of how many segments are allowed to be in the
42   *  index.  If the index is over-budget, then the policy
43   *  sorts segments by decreasing size (pro-rating by percent
44   *  deletes), and then finds the least-cost merge.  Merge
45   *  cost is measured by a combination of the "skew" of the
46   *  merge (size of largest segment divided by smallest segment),
47   *  total merge size and percent deletes reclaimed,
48   *  so that merges with lower skew, smaller size
49   *  and those reclaiming more deletes, are
50   *  favored.
51   *
52   *  <p>If a merge will produce a segment that's larger than
53   *  {@link #setMaxMergedSegmentMB}, then the policy will
54   *  merge fewer segments (down to 1 at once, if that one has
55   *  deletions) to keep the segment size under budget.
56   *      
57   *  <p><b>NOTE</b>: this policy freely merges non-adjacent
58   *  segments; if this is a problem, use {@link
59   *  LogMergePolicy}.
60   *
61   *  <p><b>NOTE</b>: This policy always merges by byte size
62   *  of the segments, always pro-rates by percent deletes,
63   *  and does not apply any maximum segment size during
64   *  forceMerge (unlike {@link LogByteSizeMergePolicy}).
65   *
66   *  @lucene.experimental
67   */
68  
69  // TODO
70  //   - we could try to take into account whether a large
71  //     merge is already running (under CMS) and then bias
72  //     ourselves towards picking smaller merges if so (or,
73  //     maybe CMS should do so)
74  
75  public class TieredMergePolicy extends MergePolicy {
76    /** Default noCFSRatio.  If a merge's size is {@code >= 10%} of
77     *  the index, then we disable compound file for it.
78     *  @see MergePolicy#setNoCFSRatio */
79    public static final double DEFAULT_NO_CFS_RATIO = 0.1;
80    
81    private int maxMergeAtOnce = 10;
82    private long maxMergedSegmentBytes = 5*1024*1024*1024L;
83    private int maxMergeAtOnceExplicit = 30;
84  
85    private long floorSegmentBytes = 2*1024*1024L;
86    private double segsPerTier = 10.0;
87    private double forceMergeDeletesPctAllowed = 10.0;
88    private double reclaimDeletesWeight = 2.0;
89  
90    /** Sole constructor, setting all settings to their
91     *  defaults. */
92    public TieredMergePolicy() {
93      super(DEFAULT_NO_CFS_RATIO, MergePolicy.DEFAULT_MAX_CFS_SEGMENT_SIZE);
94    }
95  
96    /** Maximum number of segments to be merged at a time
97     *  during "normal" merging.  For explicit merging (eg,
98     *  forceMerge or forceMergeDeletes was called), see {@link
99     *  #setMaxMergeAtOnceExplicit}.  Default is 10. */
100   public TieredMergePolicy setMaxMergeAtOnce(int v) {
101     if (v < 2) {
102       throw new IllegalArgumentException("maxMergeAtOnce must be > 1 (got " + v + ")");
103     }
104     maxMergeAtOnce = v;
105     return this;
106   }
107 
108   /** Returns the current maxMergeAtOnce setting.
109    *
110    * @see #setMaxMergeAtOnce */
111   public int getMaxMergeAtOnce() {
112     return maxMergeAtOnce;
113   }
114 
115   // TODO: should addIndexes do explicit merging, too?  And,
116   // if user calls IW.maybeMerge "explicitly"
117 
118   /** Maximum number of segments to be merged at a time,
119    *  during forceMerge or forceMergeDeletes. Default is 30. */
120   public TieredMergePolicy setMaxMergeAtOnceExplicit(int v) {
121     if (v < 2) {
122       throw new IllegalArgumentException("maxMergeAtOnceExplicit must be > 1 (got " + v + ")");
123     }
124     maxMergeAtOnceExplicit = v;
125     return this;
126   }
127 
128   /** Returns the current maxMergeAtOnceExplicit setting.
129    *
130    * @see #setMaxMergeAtOnceExplicit */
131   public int getMaxMergeAtOnceExplicit() {
132     return maxMergeAtOnceExplicit;
133   }
134 
135   /** Maximum sized segment to produce during
136    *  normal merging.  This setting is approximate: the
137    *  estimate of the merged segment size is made by summing
138    *  sizes of to-be-merged segments (compensating for
139    *  percent deleted docs).  Default is 5 GB. */
140   public TieredMergePolicy setMaxMergedSegmentMB(double v) {
141     if (v < 0.0) {
142       throw new IllegalArgumentException("maxMergedSegmentMB must be >=0 (got " + v + ")");
143     }
144     v *= 1024 * 1024;
145     maxMergedSegmentBytes = v > Long.MAX_VALUE ? Long.MAX_VALUE : (long) v;
146     return this;
147   }
148 
149   /** Returns the current maxMergedSegmentMB setting.
150    *
151    * @see #getMaxMergedSegmentMB */
152   public double getMaxMergedSegmentMB() {
153     return maxMergedSegmentBytes/1024/1024.;
154   }
155 
156   /** Controls how aggressively merges that reclaim more
157    *  deletions are favored.  Higher values will more
158    *  aggressively target merges that reclaim deletions, but
159    *  be careful not to go so high that way too much merging
160    *  takes place; a value of 3.0 is probably nearly too
161    *  high.  A value of 0.0 means deletions don't impact
162    *  merge selection. */ 
163   public TieredMergePolicy setReclaimDeletesWeight(double v) {
164     if (v < 0.0) {
165       throw new IllegalArgumentException("reclaimDeletesWeight must be >= 0.0 (got " + v + ")");
166     }
167     reclaimDeletesWeight = v;
168     return this;
169   }
170 
171   /** See {@link #setReclaimDeletesWeight}. */
172   public double getReclaimDeletesWeight() {
173     return reclaimDeletesWeight;
174   }
175 
176   /** Segments smaller than this are "rounded up" to this
177    *  size, ie treated as equal (floor) size for merge
178    *  selection.  This is to prevent frequent flushing of
179    *  tiny segments from allowing a long tail in the index.
180    *  Default is 2 MB. */
181   public TieredMergePolicy setFloorSegmentMB(double v) {
182     if (v <= 0.0) {
183       throw new IllegalArgumentException("floorSegmentMB must be >= 0.0 (got " + v + ")");
184     }
185     v *= 1024 * 1024;
186     floorSegmentBytes = v > Long.MAX_VALUE ? Long.MAX_VALUE : (long) v;
187     return this;
188   }
189 
190   /** Returns the current floorSegmentMB.
191    *
192    *  @see #setFloorSegmentMB */
193   public double getFloorSegmentMB() {
194     return floorSegmentBytes/(1024*1024.);
195   }
196 
197   /** When forceMergeDeletes is called, we only merge away a
198    *  segment if its delete percentage is over this
199    *  threshold.  Default is 10%. */ 
200   public TieredMergePolicy setForceMergeDeletesPctAllowed(double v) {
201     if (v < 0.0 || v > 100.0) {
202       throw new IllegalArgumentException("forceMergeDeletesPctAllowed must be between 0.0 and 100.0 inclusive (got " + v + ")");
203     }
204     forceMergeDeletesPctAllowed = v;
205     return this;
206   }
207 
208   /** Returns the current forceMergeDeletesPctAllowed setting.
209    *
210    * @see #setForceMergeDeletesPctAllowed */
211   public double getForceMergeDeletesPctAllowed() {
212     return forceMergeDeletesPctAllowed;
213   }
214 
215   /** Sets the allowed number of segments per tier.  Smaller
216    *  values mean more merging but fewer segments.
217    *
218    *  <p><b>NOTE</b>: this value should be {@code >=} the {@link
219    *  #setMaxMergeAtOnce} otherwise you'll force too much
220    *  merging to occur.</p>
221    *
222    *  <p>Default is 10.0.</p> */
223   public TieredMergePolicy setSegmentsPerTier(double v) {
224     if (v < 2.0) {
225       throw new IllegalArgumentException("segmentsPerTier must be >= 2.0 (got " + v + ")");
226     }
227     segsPerTier = v;
228     return this;
229   }
230 
231   /** Returns the current segmentsPerTier setting.
232    *
233    * @see #setSegmentsPerTier */
234   public double getSegmentsPerTier() {
235     return segsPerTier;
236   }
237 
238   private class SegmentByteSizeDescending implements Comparator<SegmentCommitInfo> {
239 
240     private final IndexWriter writer;
241 
242     SegmentByteSizeDescending(IndexWriter writer) {
243       this.writer = writer;
244     }
245     @Override
246     public int compare(SegmentCommitInfo o1, SegmentCommitInfo o2) {
247       try {
248         final long sz1 = size(o1, writer);
249         final long sz2 = size(o2, writer);
250         if (sz1 > sz2) {
251           return -1;
252         } else if (sz2 > sz1) {
253           return 1;
254         } else {
255           return o1.info.name.compareTo(o2.info.name);
256         }
257       } catch (IOException ioe) {
258         throw new RuntimeException(ioe);
259       }
260     }
261   }
262 
263   /** Holds score and explanation for a single candidate
264    *  merge. */
265   protected static abstract class MergeScore {
266     /** Sole constructor. (For invocation by subclass 
267      *  constructors, typically implicit.) */
268     protected MergeScore() {
269     }
270     
271     /** Returns the score for this merge candidate; lower
272      *  scores are better. */
273     abstract double getScore();
274 
275     /** Human readable explanation of how the merge got this
276      *  score. */
277     abstract String getExplanation();
278   }
279 
280   @Override
281   public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos infos, IndexWriter writer) throws IOException {
282     if (verbose(writer)) {
283       message("findMerges: " + infos.size() + " segments", writer);
284     }
285     if (infos.size() == 0) {
286       return null;
287     }
288     final Collection<SegmentCommitInfo> merging = writer.getMergingSegments();
289     final Collection<SegmentCommitInfo> toBeMerged = new HashSet<>();
290 
291     final List<SegmentCommitInfo> infosSorted = new ArrayList<>(infos.asList());
292     Collections.sort(infosSorted, new SegmentByteSizeDescending(writer));
293 
294     // Compute total index bytes & print details about the index
295     long totIndexBytes = 0;
296     long minSegmentBytes = Long.MAX_VALUE;
297     for(SegmentCommitInfo info : infosSorted) {
298       final long segBytes = size(info, writer);
299       if (verbose(writer)) {
300         String extra = merging.contains(info) ? " [merging]" : "";
301         if (segBytes >= maxMergedSegmentBytes/2.0) {
302           extra += " [skip: too large]";
303         } else if (segBytes < floorSegmentBytes) {
304           extra += " [floored]";
305         }
306         message("  seg=" + writer.segString(info) + " size=" + String.format(Locale.ROOT, "%.3f", segBytes/1024/1024.) + " MB" + extra, writer);
307       }
308 
309       minSegmentBytes = Math.min(segBytes, minSegmentBytes);
310       // Accum total byte size
311       totIndexBytes += segBytes;
312     }
313 
314     // If we have too-large segments, grace them out
315     // of the maxSegmentCount:
316     int tooBigCount = 0;
317     while (tooBigCount < infosSorted.size()) {
318       long segBytes = size(infosSorted.get(tooBigCount), writer);
319       if (segBytes < maxMergedSegmentBytes/2.0) {
320         break;
321       }
322       totIndexBytes -= segBytes;
323       tooBigCount++;
324     }
325 
326     minSegmentBytes = floorSize(minSegmentBytes);
327 
328     // Compute max allowed segs in the index
329     long levelSize = minSegmentBytes;
330     long bytesLeft = totIndexBytes;
331     double allowedSegCount = 0;
332     while(true) {
333       final double segCountLevel = bytesLeft / (double) levelSize;
334       if (segCountLevel < segsPerTier) {
335         allowedSegCount += Math.ceil(segCountLevel);
336         break;
337       }
338       allowedSegCount += segsPerTier;
339       bytesLeft -= segsPerTier * levelSize;
340       levelSize *= maxMergeAtOnce;
341     }
342     int allowedSegCountInt = (int) allowedSegCount;
343 
344     MergeSpecification spec = null;
345 
346     // Cycle to possibly select more than one merge:
347     while(true) {
348 
349       long mergingBytes = 0;
350 
351       // Gather eligible segments for merging, ie segments
352       // not already being merged and not already picked (by
353       // prior iteration of this loop) for merging:
354       final List<SegmentCommitInfo> eligible = new ArrayList<>();
355       for(int idx = tooBigCount; idx<infosSorted.size(); idx++) {
356         final SegmentCommitInfo info = infosSorted.get(idx);
357         if (merging.contains(info)) {
358           mergingBytes += size(info, writer);
359         } else if (!toBeMerged.contains(info)) {
360           eligible.add(info);
361         }
362       }
363 
364       final boolean maxMergeIsRunning = mergingBytes >= maxMergedSegmentBytes;
365 
366       if (verbose(writer)) {
367         message("  allowedSegmentCount=" + allowedSegCountInt + " vs count=" + infosSorted.size() + " (eligible count=" + eligible.size() + ") tooBigCount=" + tooBigCount, writer);
368       }
369 
370       if (eligible.size() == 0) {
371         return spec;
372       }
373 
374       if (eligible.size() > allowedSegCountInt) {
375 
376         // OK we are over budget -- find best merge!
377         MergeScore bestScore = null;
378         List<SegmentCommitInfo> best = null;
379         boolean bestTooLarge = false;
380         long bestMergeBytes = 0;
381 
382         // Consider all merge starts:
383         for(int startIdx = 0;startIdx <= eligible.size()-maxMergeAtOnce; startIdx++) {
384 
385           long totAfterMergeBytes = 0;
386 
387           final List<SegmentCommitInfo> candidate = new ArrayList<>();
388           boolean hitTooLarge = false;
389           for(int idx = startIdx;idx<eligible.size() && candidate.size() < maxMergeAtOnce;idx++) {
390             final SegmentCommitInfo info = eligible.get(idx);
391             final long segBytes = size(info, writer);
392 
393             if (totAfterMergeBytes + segBytes > maxMergedSegmentBytes) {
394               hitTooLarge = true;
395               // NOTE: we continue, so that we can try
396               // "packing" smaller segments into this merge
397               // to see if we can get closer to the max
398               // size; this in general is not perfect since
399               // this is really "bin packing" and we'd have
400               // to try different permutations.
401               continue;
402             }
403             candidate.add(info);
404             totAfterMergeBytes += segBytes;
405           }
406 
407           // We should never see an empty candidate: we iterated over maxMergeAtOnce
408           // segments, and already pre-excluded the too-large segments:
409           assert candidate.size() > 0;
410 
411           final MergeScore score = score(candidate, hitTooLarge, mergingBytes, writer);
412           if (verbose(writer)) {
413             message("  maybe=" + writer.segString(candidate) + " score=" + score.getScore() + " " + score.getExplanation() + " tooLarge=" + hitTooLarge + " size=" + String.format(Locale.ROOT, "%.3f MB", totAfterMergeBytes/1024./1024.), writer);
414           }
415 
416           // If we are already running a max sized merge
417           // (maxMergeIsRunning), don't allow another max
418           // sized merge to kick off:
419           if ((bestScore == null || score.getScore() < bestScore.getScore()) && (!hitTooLarge || !maxMergeIsRunning)) {
420             best = candidate;
421             bestScore = score;
422             bestTooLarge = hitTooLarge;
423             bestMergeBytes = totAfterMergeBytes;
424           }
425         }
426         
427         if (best != null) {
428           if (spec == null) {
429             spec = new MergeSpecification();
430           }
431           final OneMerge merge = new OneMerge(best);
432           spec.add(merge);
433           for(SegmentCommitInfo info : merge.segments) {
434             toBeMerged.add(info);
435           }
436 
437           if (verbose(writer)) {
438             message("  add merge=" + writer.segString(merge.segments) + " size=" + String.format(Locale.ROOT, "%.3f MB", bestMergeBytes/1024./1024.) + " score=" + String.format(Locale.ROOT, "%.3f", bestScore.getScore()) + " " + bestScore.getExplanation() + (bestTooLarge ? " [max merge]" : ""), writer);
439           }
440         } else {
441           return spec;
442         }
443       } else {
444         return spec;
445       }
446     }
447   }
448 
449   /** Expert: scores one merge; subclasses can override. */
450   protected MergeScore score(List<SegmentCommitInfo> candidate, boolean hitTooLarge, long mergingBytes, IndexWriter writer) throws IOException {
451     long totBeforeMergeBytes = 0;
452     long totAfterMergeBytes = 0;
453     long totAfterMergeBytesFloored = 0;
454     for(SegmentCommitInfo info : candidate) {
455       final long segBytes = size(info, writer);
456       totAfterMergeBytes += segBytes;
457       totAfterMergeBytesFloored += floorSize(segBytes);
458       totBeforeMergeBytes += info.sizeInBytes();
459     }
460 
461     // Roughly measure "skew" of the merge, i.e. how
462     // "balanced" the merge is (whether the segments are
463     // about the same size), which can range from
464     // 1.0/numSegsBeingMerged (good) to 1.0 (poor). Heavily
465     // lopsided merges (skew near 1.0) is no good; it means
466     // O(N^2) merge cost over time:
467     final double skew;
468     if (hitTooLarge) {
469       // Pretend the merge has perfect skew; skew doesn't
470       // matter in this case because this merge will not
471       // "cascade" and so it cannot lead to N^2 merge cost
472       // over time:
473       skew = 1.0/maxMergeAtOnce;
474     } else {
475       skew = ((double) floorSize(size(candidate.get(0), writer)))/totAfterMergeBytesFloored;
476     }
477 
478     // Strongly favor merges with less skew (smaller
479     // mergeScore is better):
480     double mergeScore = skew;
481 
482     // Gently favor smaller merges over bigger ones.  We
483     // don't want to make this exponent too large else we
484     // can end up doing poor merges of small segments in
485     // order to avoid the large merges:
486     mergeScore *= Math.pow(totAfterMergeBytes, 0.05);
487 
488     // Strongly favor merges that reclaim deletes:
489     final double nonDelRatio = ((double) totAfterMergeBytes)/totBeforeMergeBytes;
490     mergeScore *= Math.pow(nonDelRatio, reclaimDeletesWeight);
491 
492     final double finalMergeScore = mergeScore;
493 
494     return new MergeScore() {
495 
496       @Override
497       public double getScore() {
498         return finalMergeScore;
499       }
500 
501       @Override
502       public String getExplanation() {
503         return "skew=" + String.format(Locale.ROOT, "%.3f", skew) + " nonDelRatio=" + String.format(Locale.ROOT, "%.3f", nonDelRatio);
504       }
505     };
506   }
507 
508   @Override
509   public MergeSpecification findForcedMerges(SegmentInfos infos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException {
510     if (verbose(writer)) {
511       message("findForcedMerges maxSegmentCount=" + maxSegmentCount + " infos=" + writer.segString(infos) + " segmentsToMerge=" + segmentsToMerge, writer);
512     }
513 
514     List<SegmentCommitInfo> eligible = new ArrayList<>();
515     boolean forceMergeRunning = false;
516     final Collection<SegmentCommitInfo> merging = writer.getMergingSegments();
517     boolean segmentIsOriginal = false;
518     for(SegmentCommitInfo info : infos) {
519       final Boolean isOriginal = segmentsToMerge.get(info);
520       if (isOriginal != null) {
521         segmentIsOriginal = isOriginal;
522         if (!merging.contains(info)) {
523           eligible.add(info);
524         } else {
525           forceMergeRunning = true;
526         }
527       }
528     }
529 
530     if (eligible.size() == 0) {
531       return null;
532     }
533 
534     if ((maxSegmentCount > 1 && eligible.size() <= maxSegmentCount) ||
535         (maxSegmentCount == 1 && eligible.size() == 1 && (!segmentIsOriginal || isMerged(infos, eligible.get(0), writer)))) {
536       if (verbose(writer)) {
537         message("already merged", writer);
538       }
539       return null;
540     }
541 
542     Collections.sort(eligible, new SegmentByteSizeDescending(writer));
543 
544     if (verbose(writer)) {
545       message("eligible=" + eligible, writer);
546       message("forceMergeRunning=" + forceMergeRunning, writer);
547     }
548 
549     int end = eligible.size();
550     
551     MergeSpecification spec = null;
552 
553     // Do full merges, first, backwards:
554     while(end >= maxMergeAtOnceExplicit + maxSegmentCount - 1) {
555       if (spec == null) {
556         spec = new MergeSpecification();
557       }
558       final OneMerge merge = new OneMerge(eligible.subList(end-maxMergeAtOnceExplicit, end));
559       if (verbose(writer)) {
560         message("add merge=" + writer.segString(merge.segments), writer);
561       }
562       spec.add(merge);
563       end -= maxMergeAtOnceExplicit;
564     }
565 
566     if (spec == null && !forceMergeRunning) {
567       // Do final merge
568       final int numToMerge = end - maxSegmentCount + 1;
569       final OneMerge merge = new OneMerge(eligible.subList(end-numToMerge, end));
570       if (verbose(writer)) {
571         message("add final merge=" + merge.segString(), writer);
572       }
573       spec = new MergeSpecification();
574       spec.add(merge);
575     }
576 
577     return spec;
578   }
579 
580   @Override
581   public MergeSpecification findForcedDeletesMerges(SegmentInfos infos, IndexWriter writer) throws IOException {
582     if (verbose(writer)) {
583       message("findForcedDeletesMerges infos=" + writer.segString(infos) + " forceMergeDeletesPctAllowed=" + forceMergeDeletesPctAllowed, writer);
584     }
585     final List<SegmentCommitInfo> eligible = new ArrayList<>();
586     final Collection<SegmentCommitInfo> merging = writer.getMergingSegments();
587     for(SegmentCommitInfo info : infos) {
588       double pctDeletes = 100.*((double) writer.numDeletedDocs(info))/info.info.maxDoc();
589       if (pctDeletes > forceMergeDeletesPctAllowed && !merging.contains(info)) {
590         eligible.add(info);
591       }
592     }
593 
594     if (eligible.size() == 0) {
595       return null;
596     }
597 
598     Collections.sort(eligible, new SegmentByteSizeDescending(writer));
599 
600     if (verbose(writer)) {
601       message("eligible=" + eligible, writer);
602     }
603 
604     int start = 0;
605     MergeSpecification spec = null;
606 
607     while(start < eligible.size()) {
608       // Don't enforce max merged size here: app is explicitly
609       // calling forceMergeDeletes, and knows this may take a
610       // long time / produce big segments (like forceMerge):
611       final int end = Math.min(start + maxMergeAtOnceExplicit, eligible.size());
612       if (spec == null) {
613         spec = new MergeSpecification();
614       }
615 
616       final OneMerge merge = new OneMerge(eligible.subList(start, end));
617       if (verbose(writer)) {
618         message("add merge=" + writer.segString(merge.segments), writer);
619       }
620       spec.add(merge);
621       start = end;
622     }
623 
624     return spec;
625   }
626 
627   private long floorSize(long bytes) {
628     return Math.max(floorSegmentBytes, bytes);
629   }
630 
631   private boolean verbose(IndexWriter writer) {
632     return writer != null && writer.infoStream.isEnabled("TMP");
633   }
634 
635   private void message(String message, IndexWriter writer) {
636     writer.infoStream.message("TMP", message);
637   }
638 
639   @Override
640   public String toString() {
641     StringBuilder sb = new StringBuilder("[" + getClass().getSimpleName() + ": ");
642     sb.append("maxMergeAtOnce=").append(maxMergeAtOnce).append(", ");
643     sb.append("maxMergeAtOnceExplicit=").append(maxMergeAtOnceExplicit).append(", ");
644     sb.append("maxMergedSegmentMB=").append(maxMergedSegmentBytes/1024/1024.).append(", ");
645     sb.append("floorSegmentMB=").append(floorSegmentBytes/1024/1024.).append(", ");
646     sb.append("forceMergeDeletesPctAllowed=").append(forceMergeDeletesPctAllowed).append(", ");
647     sb.append("segmentsPerTier=").append(segsPerTier).append(", ");
648     sb.append("maxCFSSegmentSizeMB=").append(getMaxCFSSegmentSizeMB()).append(", ");
649     sb.append("noCFSRatio=").append(noCFSRatio);
650     return sb.toString();
651   }
652 }